to ask for DMA pages.
Signed-off-by: srparish@us.ibm.com
/* 2. Get a new contiguous memory extent. */
BUG_ON(HYPERVISOR_dom_mem_op(
- MEMOP_increase_reservation, &mfn, 1, order) != 1);
+ MEMOP_increase_reservation, &mfn, 1, order | (32<<8)) != 1);
/* 3. Map the new extent in place of old pages. */
for (i = 0; i < (1<<order); i++) {
unsigned int order = get_order(max * PAGE_SIZE);
if ( (max & (max-1)) != 0 )
order--;
- while ( (page = alloc_domheap_pages(d, order)) == NULL )
+ while ( (page = alloc_domheap_pages(d, order, 0)) == NULL )
if ( order-- == 0 )
break;
return page;
mpt_size = 4*1024*1024;
for ( i = 0; i < (mpt_size >> L2_PAGETABLE_SHIFT); i++ )
{
- if ( (pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER)) == NULL )
+ if ( (pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER, 0) == NULL )
panic("Not enough memory to bootstrap Xen.\n");
idle_pg_table_l2[l2_linear_offset(RDWR_MPT_VIRT_START) + i] =
l2e_from_page(pg, PAGE_HYPERVISOR | _PAGE_PSE);
*/
for ( i = 0; i < max_page; i += ((1UL << L2_PAGETABLE_SHIFT) / 8) )
{
- pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER);
+ pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER, 0);
if ( pg == NULL )
panic("Not enough memory for m2p table\n");
map_pages_to_xen(
unsigned long *extent_list,
unsigned long start_extent,
unsigned int nr_extents,
- unsigned int extent_order)
+ unsigned int extent_order,
+ unsigned int flags)
{
struct pfn_info *page;
unsigned long i;
{
PREEMPT_CHECK(MEMOP_increase_reservation);
- if ( unlikely((page = alloc_domheap_pages(d, extent_order)) == NULL) )
+ if ( unlikely((page = alloc_domheap_pages(d, extent_order,
+ flags)) == NULL) )
{
DPRINTK("Could not allocate a frame\n");
return i;
{
struct domain *d;
unsigned long rc, start_extent;
+ unsigned int address_bits_order;
/* Extract @start_extent from @op. */
start_extent = op >> START_EXTENT_SHIFT;
op &= (1 << START_EXTENT_SHIFT) - 1;
+ /* seperate extent_order and address_bits_order */
+ address_bits_order = (extent_order >> 1) & 0xff;
+ extent_order &= 0xff;
+
if ( unlikely(start_extent > nr_extents) )
return -EINVAL;
{
case MEMOP_increase_reservation:
rc = alloc_dom_mem(
- d, extent_list, start_extent, nr_extents, extent_order);
+ d, extent_list, start_extent, nr_extents, extent_order,
+ (address_bits_order <= 32) ? ALLOC_DOM_DMA : 0);
break;
case MEMOP_decrease_reservation:
rc = free_dom_mem(
#define MEMZONE_XEN 0
#define MEMZONE_DOM 1
-#define NR_ZONES 2
+#define MEMZONE_DMADOM 2
+#define NR_ZONES 3
+
+
+#define MAX_DMADOM_PFN 0xFFFFF
+#define pfn_dom_zone_type(_pfn) \
+ (((_pfn) <= MAX_DMADOM_PFN) ? MEMZONE_DMADOM : MEMZONE_DOM)
/* Up to 2^20 pages can be allocated at once. */
#define MAX_ORDER 20
if ( next_free )
map_alloc(i+1, 1); /* prevent merging in free_heap_pages() */
if ( curr_free )
- free_heap_pages(MEMZONE_DOM, pfn_to_page(i), 0);
+ free_heap_pages(pfn_dom_zone_type(i), pfn_to_page(i), 0);
}
}
{
ASSERT(!in_irq());
- ps = round_pgup(ps);
- pe = round_pgdown(pe);
+ ps = round_pgup(ps) >> PAGE_SHIFT;
+ pe = round_pgdown(pe) >> PAGE_SHIFT;
- init_heap_pages(MEMZONE_DOM, phys_to_page(ps), (pe - ps) >> PAGE_SHIFT);
+ if (ps < MAX_DMADOM_PFN && pe > MAX_DMADOM_PFN) {
+ init_heap_pages(MEMZONE_DMADOM, pfn_to_page(ps), MAX_DMADOM_PFN - ps);
+ init_heap_pages(MEMZONE_DOM, pfn_to_page(MAX_DMADOM_PFN),
+ pe - MAX_DMADOM_PFN);
+ }
+ else
+ init_heap_pages(pfn_dom_zone_type(ps), pfn_to_page(ps), pe - ps);
}
-struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order)
+struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order,
+ unsigned int flags)
{
struct pfn_info *pg;
cpumask_t mask;
ASSERT(!in_irq());
- if ( unlikely((pg = alloc_heap_pages(MEMZONE_DOM, order)) == NULL) )
- return NULL;
+ pg = NULL;
+ if (! (flags & ALLOC_DOM_DMA))
+ pg = alloc_heap_pages(MEMZONE_DOM, order);
+ if (pg == NULL) {
+ if ( unlikely((pg = alloc_heap_pages(MEMZONE_DMADOM, order)) == NULL) )
+ return NULL;
+ }
mask = pg->u.free.cpumask;
tlbflush_filter(mask, pg->tlbflush_timestamp);
DPRINTK("...or the domain is dying (%d)\n",
!!test_bit(_DOMF_dying, &d->domain_flags));
spin_unlock(&d->page_alloc_lock);
- free_heap_pages(MEMZONE_DOM, pg, order);
+ free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, order);
return NULL;
}
if ( likely(!test_bit(_DOMF_dying, &d->domain_flags)) )
{
- free_heap_pages(MEMZONE_DOM, pg, order);
+ free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, order);
}
else
{
else
{
/* Freeing an anonymous domain-heap page. */
- free_heap_pages(MEMZONE_DOM, pg, order);
+ free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, order);
drop_dom_ref = 0;
}
unsigned long avail_domheap_pages(void)
{
- return avail[MEMZONE_DOM];
+ return avail[MEMZONE_DOM] + avail[MEMZONE_DMADOM];
}
p = map_domain_page(page_to_pfn(pg));
clear_page(p);
unmap_domain_page(p);
- free_heap_pages(MEMZONE_DOM, pg, 0);
+ free_heap_pages(pfn_dom_zone_type(page_to_pfn(pg)), pg, 0);
}
} while ( (NOW() - start) < MILLISECS(1) );
}
/* Domain suballocator. These functions are *not* interrupt-safe.*/
void init_domheap_pages(physaddr_t ps, physaddr_t pe);
-struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order);
+struct pfn_info *alloc_domheap_pages(
+ struct domain *d, unsigned int order, unsigned int flags);
void free_domheap_pages(struct pfn_info *pg, unsigned int order);
unsigned long avail_domheap_pages(void);
-#define alloc_domheap_page(d) (alloc_domheap_pages(d,0))
+#define alloc_domheap_page(d) (alloc_domheap_pages(d,0,0))
#define free_domheap_page(p) (free_domheap_pages(p,0))
+#define ALLOC_DOM_DMA 1
+
/* Automatic page scrubbing for dead domains. */
extern struct list_head page_scrub_list;
#define page_scrub_schedule_work() \